uint32_t poolid)
{
int err = 0;
- xc_cpupoolinfo_t *info;
- uint8_t *local;
+ xc_cpupoolinfo_t *info = NULL;
int local_size;
int cpumap_size;
int size;
DECLARE_SYSCTL;
+ DECLARE_HYPERCALL_BUFFER(uint8_t, local);
local_size = get_cpumap_size(xch);
if (!local_size)
PERROR("Could not get number of cpus");
return NULL;
}
- local = alloca(local_size);
- cpumap_size = (local_size + sizeof(*info->cpumap) - 1) / sizeof(*info->cpumap);
- size = sizeof(xc_cpupoolinfo_t) + cpumap_size * sizeof(*info->cpumap);
- info = malloc(size);
- if ( !info )
+
+ local = xc_hypercall_buffer_alloc(xch, local, local_size);
+ if ( local == NULL ) {
+ PERROR("Could not allocate locked memory for xc_cpupool_getinfo");
return NULL;
+ }
- memset(info, 0, size);
- info->cpumap_size = local_size * 8;
- info->cpumap = (uint64_t *)(info + 1);
+ cpumap_size = (local_size + sizeof(*info->cpumap) - 1) / sizeof(*info->cpumap);
+ size = sizeof(xc_cpupoolinfo_t) + cpumap_size * sizeof(*info->cpumap);
sysctl.cmd = XEN_SYSCTL_cpupool_op;
sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
sysctl.u.cpupool_op.cpupool_id = poolid;
- set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
+ xc_set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
sysctl.u.cpupool_op.cpumap.nr_cpus = local_size * 8;
- if ( (err = lock_pages(xch, local, local_size)) != 0 )
- {
- PERROR("Could not lock memory for Xen hypercall");
- free(info);
- return NULL;
- }
err = do_sysctl_save(xch, &sysctl);
- unlock_pages(xch, local, local_size);
if ( err < 0 )
- {
- free(info);
- return NULL;
- }
+ goto out;
+
+ info = malloc(size);
+ if ( !info )
+ goto out;
+
+ memset(info, 0, size);
+ info->cpumap_size = local_size * 8;
+ info->cpumap = (uint64_t *)(info + 1);
info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
info->sched_id = sysctl.u.cpupool_op.sched_id;
info->n_dom = sysctl.u.cpupool_op.n_dom;
bitmap_byte_to_64(info->cpumap, local, local_size * 8);
+out:
+ xc_hypercall_buffer_free(xch, local);
+
return info;
}
uint64_t * xc_cpupool_freeinfo(xc_interface *xch,
int *cpusize)
{
- int err;
- uint8_t *local;
- uint64_t *cpumap;
+ int err = -1;
+ uint64_t *cpumap = NULL;
DECLARE_SYSCTL;
+ DECLARE_HYPERCALL_BUFFER(uint8_t, local);
*cpusize = get_cpumap_size(xch);
if (*cpusize == 0)
return NULL;
- local = alloca(*cpusize);
- cpumap = calloc((*cpusize + sizeof(*cpumap) - 1) / sizeof(*cpumap), sizeof(*cpumap));
- if (cpumap == NULL)
+
+ local = xc_hypercall_buffer_alloc(xch, local, *cpusize);
+ if ( local == NULL ) {
+ PERROR("Could not allocate locked memory for xc_cpupool_freeinfo");
return NULL;
+ }
sysctl.cmd = XEN_SYSCTL_cpupool_op;
sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO;
- set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
+ xc_set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
sysctl.u.cpupool_op.cpumap.nr_cpus = *cpusize * 8;
- if ( (err = lock_pages(xch, local, *cpusize)) != 0 )
- {
- PERROR("Could not lock memory for Xen hypercall");
- free(cpumap);
- return NULL;
- }
-
err = do_sysctl_save(xch, &sysctl);
- unlock_pages(xch, local, *cpusize);
- bitmap_byte_to_64(cpumap, local, *cpusize * 8);
- if (err >= 0)
- return cpumap;
+ if ( err < 0 )
+ goto out;
+
+ cpumap = calloc((*cpusize + sizeof(*cpumap) - 1) / sizeof(*cpumap), sizeof(*cpumap));
+ if (cpumap == NULL)
+ goto out;
+
+ bitmap_byte_to_64(cpumap, local, *cpusize * 8);
- free(cpumap);
- return NULL;
+out:
+ xc_hypercall_buffer_free(xch, local);
+ return cpumap;
}
{
int ret = 0;
DECLARE_SYSCTL;
+ DECLARE_HYPERCALL_BOUNCE(info, max_domains*sizeof(*info), XC_HYPERCALL_BUFFER_BOUNCE_OUT);
- if ( lock_pages(xch, info, max_domains*sizeof(xc_domaininfo_t)) != 0 )
+ if ( xc_hypercall_bounce_pre(xch, info) )
return -1;
sysctl.cmd = XEN_SYSCTL_getdomaininfolist;
sysctl.u.getdomaininfolist.first_domain = first_domain;
sysctl.u.getdomaininfolist.max_domains = max_domains;
- set_xen_guest_handle(sysctl.u.getdomaininfolist.buffer, info);
+ xc_set_xen_guest_handle(sysctl.u.getdomaininfolist.buffer, info);
if ( xc_sysctl(xch, &sysctl) < 0 )
ret = -1;
else
ret = sysctl.u.getdomaininfolist.num_domains;
- unlock_pages(xch, info, max_domains*sizeof(xc_domaininfo_t));
+ xc_hypercall_bounce_post(xch, info);
return ret;
}
int clear, int incremental, uint32_t *pindex)
{
int ret;
- DECLARE_SYSCTL;
unsigned int nr_chars = *pnr_chars;
+ DECLARE_SYSCTL;
+ DECLARE_HYPERCALL_BOUNCE(buffer, nr_chars, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+ if ( xc_hypercall_bounce_pre(xch, buffer) )
+ return -1;
sysctl.cmd = XEN_SYSCTL_readconsole;
- set_xen_guest_handle(sysctl.u.readconsole.buffer, buffer);
+ xc_set_xen_guest_handle(sysctl.u.readconsole.buffer, buffer);
sysctl.u.readconsole.count = nr_chars;
sysctl.u.readconsole.clear = clear;
sysctl.u.readconsole.incremental = 0;
sysctl.u.readconsole.incremental = incremental;
}
- if ( (ret = lock_pages(xch, buffer, nr_chars)) != 0 )
- return ret;
-
if ( (ret = do_sysctl(xch, &sysctl)) == 0 )
{
*pnr_chars = sysctl.u.readconsole.count;
*pindex = sysctl.u.readconsole.index;
}
- unlock_pages(xch, buffer, nr_chars);
+ xc_hypercall_bounce_post(xch, buffer);
return ret;
}
{
int ret, len = strlen(keys);
DECLARE_SYSCTL;
+ DECLARE_HYPERCALL_BOUNCE(keys, len, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+ if ( xc_hypercall_bounce_pre(xch, keys) )
+ return -1;
sysctl.cmd = XEN_SYSCTL_debug_keys;
- set_xen_guest_handle(sysctl.u.debug_keys.keys, keys);
+ xc_set_xen_guest_handle(sysctl.u.debug_keys.keys, keys);
sysctl.u.debug_keys.nr_keys = len;
- if ( (ret = lock_pages(xch, keys, len)) != 0 )
- return ret;
-
ret = do_sysctl(xch, &sysctl);
- unlock_pages(xch, keys, len);
+ xc_hypercall_bounce_post(xch, keys);
return ret;
}
sysctl.cmd = XEN_SYSCTL_perfc_op;
sysctl.u.perfc_op.cmd = XEN_SYSCTL_PERFCOP_reset;
- set_xen_guest_handle(sysctl.u.perfc_op.desc, NULL);
- set_xen_guest_handle(sysctl.u.perfc_op.val, NULL);
+ xc_set_xen_guest_handle(sysctl.u.perfc_op.desc, HYPERCALL_BUFFER_NULL);
+ xc_set_xen_guest_handle(sysctl.u.perfc_op.val, HYPERCALL_BUFFER_NULL);
return do_sysctl(xch, &sysctl);
}
sysctl.cmd = XEN_SYSCTL_perfc_op;
sysctl.u.perfc_op.cmd = XEN_SYSCTL_PERFCOP_query;
- set_xen_guest_handle(sysctl.u.perfc_op.desc, NULL);
- set_xen_guest_handle(sysctl.u.perfc_op.val, NULL);
+ xc_set_xen_guest_handle(sysctl.u.perfc_op.desc, HYPERCALL_BUFFER_NULL);
+ xc_set_xen_guest_handle(sysctl.u.perfc_op.val, HYPERCALL_BUFFER_NULL);
rc = do_sysctl(xch, &sysctl);
}
int xc_perfc_query(xc_interface *xch,
- xc_perfc_desc_t *desc,
- xc_perfc_val_t *val)
+ struct xc_hypercall_buffer *desc,
+ struct xc_hypercall_buffer *val)
{
DECLARE_SYSCTL;
+ DECLARE_HYPERCALL_BUFFER_ARGUMENT(desc);
+ DECLARE_HYPERCALL_BUFFER_ARGUMENT(val);
sysctl.cmd = XEN_SYSCTL_perfc_op;
sysctl.u.perfc_op.cmd = XEN_SYSCTL_PERFCOP_query;
- set_xen_guest_handle(sysctl.u.perfc_op.desc, desc);
- set_xen_guest_handle(sysctl.u.perfc_op.val, val);
+ xc_set_xen_guest_handle(sysctl.u.perfc_op.desc, desc);
+ xc_set_xen_guest_handle(sysctl.u.perfc_op.val, val);
return do_sysctl(xch, &sysctl);
}
sysctl.cmd = XEN_SYSCTL_lockprof_op;
sysctl.u.lockprof_op.cmd = XEN_SYSCTL_LOCKPROF_reset;
- set_xen_guest_handle(sysctl.u.lockprof_op.data, NULL);
+ xc_set_xen_guest_handle(sysctl.u.lockprof_op.data, HYPERCALL_BUFFER_NULL);
return do_sysctl(xch, &sysctl);
}
sysctl.cmd = XEN_SYSCTL_lockprof_op;
sysctl.u.lockprof_op.cmd = XEN_SYSCTL_LOCKPROF_query;
- set_xen_guest_handle(sysctl.u.lockprof_op.data, NULL);
+ xc_set_xen_guest_handle(sysctl.u.lockprof_op.data, HYPERCALL_BUFFER_NULL);
rc = do_sysctl(xch, &sysctl);
}
int xc_lockprof_query(xc_interface *xch,
- uint32_t *n_elems,
- uint64_t *time,
- xc_lockprof_data_t *data)
+ uint32_t *n_elems,
+ uint64_t *time,
+ struct xc_hypercall_buffer *data)
{
int rc;
DECLARE_SYSCTL;
+ DECLARE_HYPERCALL_BUFFER_ARGUMENT(data);
sysctl.cmd = XEN_SYSCTL_lockprof_op;
sysctl.u.lockprof_op.cmd = XEN_SYSCTL_LOCKPROF_query;
sysctl.u.lockprof_op.max_elem = *n_elems;
- set_xen_guest_handle(sysctl.u.lockprof_op.data, data);
+ xc_set_xen_guest_handle(sysctl.u.lockprof_op.data, data);
rc = do_sysctl(xch, &sysctl);
{
int rc;
DECLARE_SYSCTL;
+ DECLARE_HYPERCALL_BOUNCE(info, max_cpus*sizeof(*info), XC_HYPERCALL_BUFFER_BOUNCE_OUT);
- sysctl.cmd = XEN_SYSCTL_getcpuinfo;
- sysctl.u.getcpuinfo.max_cpus = max_cpus;
- set_xen_guest_handle(sysctl.u.getcpuinfo.info, info);
+ if ( xc_hypercall_bounce_pre(xch, info) )
+ return -1;
- if ( (rc = lock_pages(xch, info, max_cpus*sizeof(*info))) != 0 )
- return rc;
+ sysctl.cmd = XEN_SYSCTL_getcpuinfo;
+ sysctl.u.getcpuinfo.max_cpus = max_cpus;
+ xc_set_xen_guest_handle(sysctl.u.getcpuinfo.info, info);
rc = do_sysctl(xch, &sysctl);
- unlock_pages(xch, info, max_cpus*sizeof(*info));
+ xc_hypercall_bounce_post(xch, info);
if ( nr_cpus )
- *nr_cpus = sysctl.u.getcpuinfo.nr_cpus;
+ *nr_cpus = sysctl.u.getcpuinfo.nr_cpus;
return rc;
}
unsigned long end, uint32_t *status)
{
DECLARE_SYSCTL;
+ DECLARE_HYPERCALL_BOUNCE(status, sizeof(uint32_t)*(end - start + 1), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
int ret = -1;
if ( !status || (end < start) )
return -EINVAL;
- if (lock_pages(xch, status, sizeof(uint32_t)*(end - start + 1)))
+ if ( xc_hypercall_bounce_pre(xch, status) )
{
- ERROR("Could not lock memory for xc_mark_page_online\n");
+ ERROR("Could not bounce memory for xc_mark_page_online\n");
return -EINVAL;
}
sysctl.u.page_offline.start = start;
sysctl.u.page_offline.cmd = sysctl_page_online;
sysctl.u.page_offline.end = end;
- set_xen_guest_handle(sysctl.u.page_offline.status, status);
+ xc_set_xen_guest_handle(sysctl.u.page_offline.status, status);
ret = xc_sysctl(xch, &sysctl);
- unlock_pages(xch, status, sizeof(uint32_t)*(end - start + 1));
+ xc_hypercall_bounce_post(xch, status);
return ret;
}
unsigned long end, uint32_t *status)
{
DECLARE_SYSCTL;
+ DECLARE_HYPERCALL_BOUNCE(status, sizeof(uint32_t)*(end - start + 1), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
int ret = -1;
if ( !status || (end < start) )
return -EINVAL;
- if (lock_pages(xch, status, sizeof(uint32_t)*(end - start + 1)))
+ if ( xc_hypercall_bounce_pre(xch, status) )
{
- ERROR("Could not lock memory for xc_mark_page_offline");
+ ERROR("Could not bounce memory for xc_mark_page_offline");
return -EINVAL;
}
sysctl.u.page_offline.start = start;
sysctl.u.page_offline.cmd = sysctl_page_offline;
sysctl.u.page_offline.end = end;
- set_xen_guest_handle(sysctl.u.page_offline.status, status);
+ xc_set_xen_guest_handle(sysctl.u.page_offline.status, status);
ret = xc_sysctl(xch, &sysctl);
- unlock_pages(xch, status, sizeof(uint32_t)*(end - start + 1));
+ xc_hypercall_bounce_post(xch, status);
return ret;
}
unsigned long end, uint32_t *status)
{
DECLARE_SYSCTL;
+ DECLARE_HYPERCALL_BOUNCE(status, sizeof(uint32_t)*(end - start + 1), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
int ret = -1;
if ( !status || (end < start) )
return -EINVAL;
- if (lock_pages(xch, status, sizeof(uint32_t)*(end - start + 1)))
+ if ( xc_hypercall_bounce_pre(xch, status) )
{
- ERROR("Could not lock memory for xc_query_page_offline_status\n");
+ ERROR("Could not bounce memory for xc_query_page_offline_status\n");
return -EINVAL;
}
sysctl.u.page_offline.start = start;
sysctl.u.page_offline.cmd = sysctl_query_page_offline;
sysctl.u.page_offline.end = end;
- set_xen_guest_handle(sysctl.u.page_offline.status, status);
+ xc_set_xen_guest_handle(sysctl.u.page_offline.status, status);
ret = xc_sysctl(xch, &sysctl);
- unlock_pages(xch, status, sizeof(uint32_t)*(end - start + 1));
+ xc_hypercall_bounce_post(xch, status);
return ret;
}
int xc_pm_get_pxstat(xc_interface *xch, int cpuid, struct xc_px_stat *pxpt)
{
DECLARE_SYSCTL;
+ /* Sizes unknown until xc_pm_get_max_px */
+ DECLARE_NAMED_HYPERCALL_BOUNCE(trans, &pxpt->trans_pt, 0, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+ DECLARE_NAMED_HYPERCALL_BOUNCE(pt, &pxpt->pt, 0, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
int max_px, ret;
if ( !pxpt || !(pxpt->trans_pt) || !(pxpt->pt) )
if ( (ret = xc_pm_get_max_px(xch, cpuid, &max_px)) != 0)
return ret;
- if ( (ret = lock_pages(xch, pxpt->trans_pt,
- max_px * max_px * sizeof(uint64_t))) != 0 )
+ HYPERCALL_BOUNCE_SET_SIZE(trans, max_px * max_px * sizeof(uint64_t));
+ HYPERCALL_BOUNCE_SET_SIZE(pt, max_px * sizeof(struct xc_px_val));
+
+ if ( xc_hypercall_bounce_pre(xch, trans) )
return ret;
- if ( (ret = lock_pages(xch, pxpt->pt,
- max_px * sizeof(struct xc_px_val))) != 0 )
+ if ( xc_hypercall_bounce_pre(xch, pt) )
{
- unlock_pages(xch, pxpt->trans_pt, max_px * max_px * sizeof(uint64_t));
+ xc_hypercall_bounce_post(xch, trans);
return ret;
}
sysctl.u.get_pmstat.type = PMSTAT_get_pxstat;
sysctl.u.get_pmstat.cpuid = cpuid;
sysctl.u.get_pmstat.u.getpx.total = max_px;
- set_xen_guest_handle(sysctl.u.get_pmstat.u.getpx.trans_pt, pxpt->trans_pt);
- set_xen_guest_handle(sysctl.u.get_pmstat.u.getpx.pt,
- (pm_px_val_t *)pxpt->pt);
+ xc_set_xen_guest_handle(sysctl.u.get_pmstat.u.getpx.trans_pt, trans);
+ xc_set_xen_guest_handle(sysctl.u.get_pmstat.u.getpx.pt, pt);
ret = xc_sysctl(xch, &sysctl);
if ( ret )
{
- unlock_pages(xch, pxpt->trans_pt, max_px * max_px * sizeof(uint64_t));
- unlock_pages(xch, pxpt->pt, max_px * sizeof(struct xc_px_val));
+ xc_hypercall_bounce_post(xch, trans);
+ xc_hypercall_bounce_post(xch, pt);
return ret;
}
pxpt->last = sysctl.u.get_pmstat.u.getpx.last;
pxpt->cur = sysctl.u.get_pmstat.u.getpx.cur;
- unlock_pages(xch, pxpt->trans_pt, max_px * max_px * sizeof(uint64_t));
- unlock_pages(xch, pxpt->pt, max_px * sizeof(struct xc_px_val));
+ xc_hypercall_bounce_post(xch, trans);
+ xc_hypercall_bounce_post(xch, pt);
return ret;
}
int xc_pm_get_cxstat(xc_interface *xch, int cpuid, struct xc_cx_stat *cxpt)
{
DECLARE_SYSCTL;
+ DECLARE_NAMED_HYPERCALL_BOUNCE(triggers, &cxpt->triggers, 0, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+ DECLARE_NAMED_HYPERCALL_BOUNCE(residencies, &cxpt->residencies, 0, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
int max_cx, ret;
if( !cxpt || !(cxpt->triggers) || !(cxpt->residencies) )
if ( (ret = xc_pm_get_max_cx(xch, cpuid, &max_cx)) )
goto unlock_0;
- if ( (ret = lock_pages(xch, cxpt, sizeof(struct xc_cx_stat))) )
+ HYPERCALL_BOUNCE_SET_SIZE(triggers, max_cx * sizeof(uint64_t));
+ HYPERCALL_BOUNCE_SET_SIZE(residencies, max_cx * sizeof(uint64_t));
+
+ ret = -1;
+ if ( xc_hypercall_bounce_pre(xch, triggers) )
goto unlock_0;
- if ( (ret = lock_pages(xch, cxpt->triggers, max_cx * sizeof(uint64_t))) )
+ if ( xc_hypercall_bounce_pre(xch, residencies) )
goto unlock_1;
- if ( (ret = lock_pages(xch, cxpt->residencies, max_cx * sizeof(uint64_t))) )
- goto unlock_2;
sysctl.cmd = XEN_SYSCTL_get_pmstat;
sysctl.u.get_pmstat.type = PMSTAT_get_cxstat;
sysctl.u.get_pmstat.cpuid = cpuid;
- set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.triggers, cxpt->triggers);
- set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.residencies,
- cxpt->residencies);
+ xc_set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.triggers, triggers);
+ xc_set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.residencies, residencies);
if ( (ret = xc_sysctl(xch, &sysctl)) )
- goto unlock_3;
+ goto unlock_2;
cxpt->nr = sysctl.u.get_pmstat.u.getcx.nr;
cxpt->last = sysctl.u.get_pmstat.u.getcx.last;
cxpt->cc3 = sysctl.u.get_pmstat.u.getcx.cc3;
cxpt->cc6 = sysctl.u.get_pmstat.u.getcx.cc6;
-unlock_3:
- unlock_pages(xch, cxpt->residencies, max_cx * sizeof(uint64_t));
unlock_2:
- unlock_pages(xch, cxpt->triggers, max_cx * sizeof(uint64_t));
+ xc_hypercall_bounce_post(xch, residencies);
unlock_1:
- unlock_pages(xch, cxpt, sizeof(struct xc_cx_stat));
+ xc_hypercall_bounce_post(xch, triggers);
unlock_0:
return ret;
}
DECLARE_SYSCTL;
int ret = 0;
struct xen_get_cpufreq_para *sys_para = &sysctl.u.pm_op.u.get_para;
+ DECLARE_NAMED_HYPERCALL_BOUNCE(affected_cpus,
+ user_para->affected_cpus,
+ user_para->cpu_num * sizeof(uint32_t), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+ DECLARE_NAMED_HYPERCALL_BOUNCE(scaling_available_frequencies,
+ user_para->scaling_available_frequencies,
+ user_para->freq_num * sizeof(uint32_t), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+ DECLARE_NAMED_HYPERCALL_BOUNCE(scaling_available_governors,
+ user_para->scaling_available_governors,
+ user_para->gov_num * CPUFREQ_NAME_LEN * sizeof(char), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
bool has_num = user_para->cpu_num &&
user_para->freq_num &&
user_para->gov_num;
- if ( (xch < 0) || !user_para )
- return -EINVAL;
-
if ( has_num )
{
if ( (!user_para->affected_cpus) ||
(!user_para->scaling_available_governors) )
return -EINVAL;
- if ( (ret = lock_pages(xch, user_para->affected_cpus,
- user_para->cpu_num * sizeof(uint32_t))) )
+ if ( xc_hypercall_bounce_pre(xch, affected_cpus) )
goto unlock_1;
- if ( (ret = lock_pages(xch, user_para->scaling_available_frequencies,
- user_para->freq_num * sizeof(uint32_t))) )
+ if ( xc_hypercall_bounce_pre(xch, scaling_available_frequencies) )
goto unlock_2;
- if ( (ret = lock_pages(xch, user_para->scaling_available_governors,
- user_para->gov_num * CPUFREQ_NAME_LEN * sizeof(char))) )
+ if ( xc_hypercall_bounce_pre(xch, scaling_available_governors) )
goto unlock_3;
- set_xen_guest_handle(sys_para->affected_cpus,
- user_para->affected_cpus);
- set_xen_guest_handle(sys_para->scaling_available_frequencies,
- user_para->scaling_available_frequencies);
- set_xen_guest_handle(sys_para->scaling_available_governors,
- user_para->scaling_available_governors);
+ xc_set_xen_guest_handle(sys_para->affected_cpus, affected_cpus);
+ xc_set_xen_guest_handle(sys_para->scaling_available_frequencies, scaling_available_frequencies);
+ xc_set_xen_guest_handle(sys_para->scaling_available_governors, scaling_available_governors);
}
sysctl.cmd = XEN_SYSCTL_pm_op;
user_para->scaling_min_freq = sys_para->scaling_min_freq;
user_para->turbo_enabled = sys_para->turbo_enabled;
- memcpy(user_para->scaling_driver,
+ memcpy(user_para->scaling_driver,
sys_para->scaling_driver, CPUFREQ_NAME_LEN);
memcpy(user_para->scaling_governor,
sys_para->scaling_governor, CPUFREQ_NAME_LEN);
}
unlock_4:
- unlock_pages(xch, user_para->scaling_available_governors,
- user_para->gov_num * CPUFREQ_NAME_LEN * sizeof(char));
+ xc_hypercall_bounce_post(xch, scaling_available_governors);
unlock_3:
- unlock_pages(xch, user_para->scaling_available_frequencies,
- user_para->freq_num * sizeof(uint32_t));
+ xc_hypercall_bounce_post(xch, scaling_available_frequencies);
unlock_2:
- unlock_pages(xch, user_para->affected_cpus,
- user_para->cpu_num * sizeof(uint32_t));
+ xc_hypercall_bounce_post(xch, affected_cpus);
unlock_1:
return ret;
}
{
int ret = -1;
DECLARE_HYPERCALL;
+ DECLARE_HYPERCALL_BOUNCE(sysctl, sizeof(*sysctl), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
- if ( hcall_buf_prep(xch, (void **)&sysctl, sizeof(*sysctl)) != 0 )
+ sysctl->interface_version = XEN_SYSCTL_INTERFACE_VERSION;
+
+ if ( xc_hypercall_bounce_pre(xch, sysctl) )
{
- PERROR("Could not lock memory for Xen hypercall");
+ PERROR("Could not bounce buffer for sysctl hypercall");
goto out1;
}
- sysctl->interface_version = XEN_SYSCTL_INTERFACE_VERSION;
-
hypercall.op = __HYPERVISOR_sysctl;
- hypercall.arg[0] = (unsigned long)sysctl;
-
+ hypercall.arg[0] = HYPERCALL_BUFFER_AS_ARG(sysctl);
if ( (ret = do_xen_hypercall(xch, &hypercall)) < 0 )
{
if ( errno == EACCES )
" rebuild the user-space tool set?\n");
}
- hcall_buf_release(xch, (void **)&sysctl, sizeof(*sysctl));
-
+ xc_hypercall_bounce_post(xch, sysctl);
out1:
return ret;
}
int xc_tbuf_set_cpu_mask(xc_interface *xch, uint32_t mask)
{
DECLARE_SYSCTL;
+ DECLARE_HYPERCALL_BUFFER(uint8_t, bytemap);
int ret = -1;
uint64_t mask64 = mask;
- uint8_t bytemap[sizeof(mask64)];
+
+ bytemap = xc_hypercall_buffer_alloc(xch, bytemap, sizeof(mask64));
+ {
+ PERROR("Could not allocate memory for xc_tbuf_set_cpu_mask hypercall");
+ goto out;
+ }
sysctl.cmd = XEN_SYSCTL_tbuf_op;
sysctl.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
bitmap_64_to_byte(bytemap, &mask64, sizeof (mask64) * 8);
- set_xen_guest_handle(sysctl.u.tbuf_op.cpu_mask.bitmap, bytemap);
+ xc_set_xen_guest_handle(sysctl.u.tbuf_op.cpu_mask.bitmap, bytemap);
sysctl.u.tbuf_op.cpu_mask.nr_cpus = sizeof(bytemap) * 8;
- if ( lock_pages(xch, &bytemap, sizeof(bytemap)) != 0 )
- {
- PERROR("Could not lock memory for Xen hypercall");
- goto out;
- }
-
ret = do_sysctl(xch, &sysctl);
- unlock_pages(xch, &bytemap, sizeof(bytemap));
+ xc_hypercall_buffer_free(xch, bytemap);
out:
return ret;
int xc_perfc_query_number(xc_interface *xch,
int *nbr_desc,
int *nbr_val);
-/* IMPORTANT: The caller is responsible for mlock()'ing the @desc and @val
- arrays. */
int xc_perfc_query(xc_interface *xch,
- xc_perfc_desc_t *desc,
- xc_perfc_val_t *val);
+ xc_hypercall_buffer_t *desc,
+ xc_hypercall_buffer_t *val);
typedef xen_sysctl_lockprof_data_t xc_lockprof_data_t;
int xc_lockprof_reset(xc_interface *xch);
int xc_lockprof_query_number(xc_interface *xch,
uint32_t *n_elems);
-/* IMPORTANT: The caller is responsible for mlock()'ing the @data array. */
int xc_lockprof_query(xc_interface *xch,
uint32_t *n_elems,
uint64_t *time,
- xc_lockprof_data_t *data);
+ xc_hypercall_buffer_t *data);
/**
* Memory maps a range within one domain to a local address range. Mappings
#include <string.h>
#include <inttypes.h>
-static int lock_pages(void *addr, size_t len)
-{
- int e = 0;
-#ifndef __sun__
- e = mlock(addr, len);
-#endif
- return (e);
-}
-
-static void unlock_pages(void *addr, size_t len)
-{
-#ifndef __sun__
- munlock(addr, len);
-#endif
-}
-
int main(int argc, char *argv[])
{
xc_interface *xc_handle;
uint64_t time;
double l, b, sl, sb;
char name[60];
- xc_lockprof_data_t *data;
+ DECLARE_HYPERCALL_BUFFER(xc_lockprof_data_t, data);
if ( (argc > 2) || ((argc == 2) && (strcmp(argv[1], "-r") != 0)) )
{
}
n += 32; /* just to be sure */
- data = malloc(sizeof(*data) * n);
- if ( (data == NULL) || (lock_pages(data, sizeof(*data) * n) != 0) )
+ data = xc_hypercall_buffer_alloc(xc_handle, data, sizeof(*data) * n);
+ if ( data == NULL )
{
- fprintf(stderr, "Could not alloc or lock buffers: %d (%s)\n",
+ fprintf(stderr, "Could not allocate buffers: %d (%s)\n",
errno, strerror(errno));
return 1;
}
i = n;
- if ( xc_lockprof_query(xc_handle, &i, &time, data) != 0 )
+ if ( xc_lockprof_query(xc_handle, &i, &time, HYPERCALL_BUFFER(data)) != 0 )
{
fprintf(stderr, "Error getting profile records: %d (%s)\n",
errno, strerror(errno));
return 1;
}
- unlock_pages(data, sizeof(*data) * n);
-
if ( i > n )
{
printf("data incomplete, %d records are missing!\n\n", i - n);
printf("total locked time: %20.9fs\n", sl);
printf("total blocked time: %20.9fs\n", sb);
+ xc_hypercall_buffer_free(xc_handle, data);
+
return 0;
}
};
#undef X
-static int lock_pages(void *addr, size_t len)
-{
- int e = 0;
-#ifndef __sun__
- e = mlock(addr, len);
-#endif
- return (e);
-}
-
-static void unlock_pages(void *addr, size_t len)
-{
-#ifndef __sun__
- munlock(addr, len);
-#endif
-}
-
int main(int argc, char *argv[])
{
int i, j;
xc_interface *xc_handle;
- xc_perfc_desc_t *pcd;
- xc_perfc_val_t *pcv;
+ DECLARE_HYPERCALL_BUFFER(xc_perfc_desc_t, pcd);
+ DECLARE_HYPERCALL_BUFFER(xc_perfc_val_t, pcv);
xc_perfc_val_t *val;
int num_desc, num_val;
unsigned int sum, reset = 0, full = 0, pretty = 0;
return 1;
}
- pcd = malloc(sizeof(*pcd) * num_desc);
- pcv = malloc(sizeof(*pcv) * num_val);
+ pcd = xc_hypercall_buffer_alloc(xc_handle, pcd, sizeof(*pcd) * num_desc);
+ pcv = xc_hypercall_buffer_alloc(xc_handle, pcv, sizeof(*pcv) * num_val);
- if ( pcd == NULL
- || lock_pages(pcd, sizeof(*pcd) * num_desc) != 0
- || pcv == NULL
- || lock_pages(pcv, sizeof(*pcv) * num_val) != 0)
+ if ( pcd == NULL || pcv == NULL)
{
- fprintf(stderr, "Could not alloc or lock buffers: %d (%s)\n",
+ fprintf(stderr, "Could not allocate buffers: %d (%s)\n",
errno, strerror(errno));
exit(-1);
}
- if ( xc_perfc_query(xc_handle, pcd, pcv) != 0 )
+ if ( xc_perfc_query(xc_handle, HYPERCALL_BUFFER(pcd), HYPERCALL_BUFFER(pcv)) != 0 )
{
fprintf(stderr, "Error getting perf counter: %d (%s)\n",
errno, strerror(errno));
return 1;
}
- unlock_pages(pcd, sizeof(*pcd) * num_desc);
- unlock_pages(pcv, sizeof(*pcv) * num_val);
-
val = pcv;
for ( i = 0; i < num_desc; i++ )
{
val += pcd[i].nr_vals;
}
+ xc_hypercall_buffer_free(xc_handle, pcd);
+ xc_hypercall_buffer_free(xc_handle, pcv);
return 0;
}